from __future__ import absolute_import, division, print_function, unicode_literals
!pip install tf-nightly-gpu-2.0-preview
!pip install matplotlib
!pip install scipy
import tensorflow as tf
import os
import numpy as np
import matplotlib.pyplot as plt
tf.__version__
import os
from PIL import Image
MAIN_PATHS = ['./images/bemisia tabaci', './images/tetranychus urticae']
broken_images = 0
images = 0
for path in MAIN_PATHS:
for image in os.listdir(path):
try:
images += 1
filename = os.path.join(path, image)
im = Image.open(filename)
im.verify() #I perform also verify, don't know if he sees other types o defects
im.close() #reload is necessary in my case
im = Image.open(filename)
im.transpose(Image.FLIP_LEFT_RIGHT)
im.close()
except Exception as e:
print(filename, e)
os.remove(filename)
broken_images += 1
#os.remove('./images/tetranychus urticae/238.Foto-121.jpg')
print(images, broken_images)
base_dir = './images/'
print(os.listdir(base_dir))
#import shutil
#shutil.rmtree('./images/.DS_Store')
IMAGE_SIZE = 224
BATCH_SIZE = 64
datagen = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1./255,
validation_split=0.2,
horizontal_flip=True, vertical_flip=True, rotation_range=360)
train_generator = datagen.flow_from_directory(
base_dir,
target_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=BATCH_SIZE,
subset='training')
val_generator = datagen.flow_from_directory(
base_dir,
target_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=BATCH_SIZE,
subset='validation')
for image_batch, label_batch in train_generator:
break
image_batch.shape, label_batch.shape
print (train_generator.class_indices)
labels = '\n'.join(sorted(train_generator.class_indices.keys()))
with open('labels.txt', 'w') as f:
f.write(labels)
IMG_SHAPE = (IMAGE_SIZE, IMAGE_SIZE, 3)
# Create the base model from the pre-trained model MobileNet V2
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
base_model.trainable = False
model = tf.keras.Sequential([
base_model,
tf.keras.layers.Conv2D(64, 3, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(2, activation='softmax')
])
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
print('Number of trainable variables = {}'.format(len(model.trainable_variables)))
epochs = 5
history = model.fit(train_generator,
epochs=epochs,
validation_data=val_generator)
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
#probabilities = model.predict_generator(val_generator)
#print(probabilities)
current_labels = ['bemisia tabaci', 'tetranychus urticae']
total_samples = 0
correct_samples = 0
total_index_0 = 0
correct_index_0 = 0
total_index_1 = 0
correct_index_1 = 0
wrong_samples = []
correct_samples_list = []
filenames = val_generator.filenames
used_indexes = []
should_stop = False
while not should_stop:
index = next(val_generator.index_generator)
image, label = val_generator._get_batches_of_transformed_samples(index)
predictions = model.predict(image)
for i in range(predictions.shape[0]):
if index[i] not in used_indexes:
used_indexes.append(index[i])
print(filenames[index[i]], current_labels[np.argmax(label[i])], current_labels[np.argmax(predictions[i])])
total_samples += 1
is_correct = current_labels[np.argmax(label[i])] == current_labels[np.argmax(predictions[i])]
if is_correct:
correct_samples += 1
correct_samples_list.append(filenames[index[i]])
else:
wrong_samples.append(filenames[index[i]])
if np.argmax(label[i]) == 0:
total_index_0 += 1
if is_correct:
correct_index_0 += 1
elif np.argmax(label[i]) == 1:
total_index_1 += 1
if is_correct:
correct_index_1 += 1
if len(used_indexes) == len(filenames):
break
print(total_samples, correct_samples / total_samples,
total_index_0, correct_index_0 / total_index_0,
total_index_1, correct_index_1 / total_index_1)
print(wrong_samples)
from IPython.display import Image as ji, display
for sample in wrong_samples:
print(sample)
display(ji(filename='./images/' + sample))
#input("Press Enter to continue...")
#ji(filename='./images/' + wrong_samples[14])
for sample in correct_samples_list:
print(sample)
display(ji(filename='./images/' + sample))